webscout 7.4__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (42) hide show
  1. webscout/Provider/C4ai.py +414 -0
  2. webscout/Provider/Cloudflare.py +18 -21
  3. webscout/Provider/DeepSeek.py +3 -32
  4. webscout/Provider/Deepinfra.py +30 -21
  5. webscout/Provider/GithubChat.py +362 -0
  6. webscout/Provider/HeckAI.py +20 -3
  7. webscout/Provider/HuggingFaceChat.py +462 -0
  8. webscout/Provider/Marcus.py +7 -50
  9. webscout/Provider/Netwrck.py +6 -53
  10. webscout/Provider/Phind.py +29 -3
  11. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  12. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  13. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  14. webscout/Provider/Venice.py +200 -200
  15. webscout/Provider/Youchat.py +1 -1
  16. webscout/Provider/__init__.py +13 -2
  17. webscout/Provider/akashgpt.py +8 -5
  18. webscout/Provider/copilot.py +416 -0
  19. webscout/Provider/flowith.py +181 -0
  20. webscout/Provider/granite.py +17 -53
  21. webscout/Provider/llamatutor.py +6 -46
  22. webscout/Provider/llmchat.py +7 -46
  23. webscout/Provider/multichat.py +29 -91
  24. webscout/exceptions.py +19 -9
  25. webscout/update_checker.py +55 -93
  26. webscout/version.py +1 -1
  27. webscout-7.5.dist-info/LICENSE.md +146 -0
  28. {webscout-7.4.dist-info → webscout-7.5.dist-info}/METADATA +5 -126
  29. {webscout-7.4.dist-info → webscout-7.5.dist-info}/RECORD +32 -33
  30. webscout/Local/__init__.py +0 -10
  31. webscout/Local/_version.py +0 -3
  32. webscout/Local/formats.py +0 -747
  33. webscout/Local/model.py +0 -1368
  34. webscout/Local/samplers.py +0 -125
  35. webscout/Local/thread.py +0 -539
  36. webscout/Local/ui.py +0 -401
  37. webscout/Local/utils.py +0 -388
  38. webscout/Provider/dgaf.py +0 -214
  39. webscout-7.4.dist-info/LICENSE.md +0 -211
  40. {webscout-7.4.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  41. {webscout-7.4.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  42. {webscout-7.4.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,414 @@
1
+ import requests
2
+ import uuid
3
+ import json
4
+ import time
5
+ import random
6
+ import re
7
+ from typing import Any, Dict, List, Optional, Union, Generator
8
+
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout import LitAgent
13
+
14
+ class C4ai(Provider):
15
+ """
16
+ A class to interact with the Hugging Face Chat API.
17
+ """
18
+ # Default available models
19
+ AVAILABLE_MODELS = [] # Placeholder for available models, It will be updated in the constructor
20
+
21
+ def __repr__(self) -> str:
22
+ return f"C4ai({self.model})"
23
+
24
+ def __init__(
25
+ self,
26
+ is_conversation: bool = True,
27
+ max_tokens: int = 2000,
28
+ timeout: int = 60,
29
+ filepath: str = None,
30
+ update_file: bool = True,
31
+ proxies: dict = {},
32
+ model: str = "command-a-03-2025",
33
+ system_prompt: str = "You are a helpful assistant.",
34
+ ):
35
+ """Initialize the C4ai client."""
36
+ self.url = "https://cohereforai-c4ai-command.hf.space"
37
+ self.session = requests.Session()
38
+ self.session.proxies.update(proxies)
39
+
40
+ # Set up headers for all requests
41
+ self.headers = {
42
+ "Content-Type": "application/json",
43
+ "User-Agent": LitAgent().random(),
44
+ "Accept": "*/*",
45
+ "Accept-Encoding": "gzip, deflate, br, zstd",
46
+ "Accept-Language": "en-US,en;q=0.9",
47
+ "Origin": "https://cohereforai-c4ai-command.hf.space",
48
+ "Referer": "https://cohereforai-c4ai-command.hf.space/",
49
+ "Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
50
+ "Sec-Ch-Ua-Mobile": "?0",
51
+ "Sec-Ch-Ua-Platform": "\"Windows\"",
52
+ "Sec-Fetch-Dest": "empty",
53
+ "Sec-Fetch-Mode": "cors",
54
+ "Sec-Fetch-Site": "same-origin",
55
+ "DNT": "1",
56
+ "Priority": "u=1, i"
57
+ }
58
+
59
+ # Update available models
60
+ self.update_available_models()
61
+
62
+ # Set default model if none provided
63
+ self.model = model
64
+ # Provider settings
65
+ self.is_conversation = is_conversation
66
+ self.max_tokens_to_sample = max_tokens
67
+ self.timeout = timeout
68
+ self.last_response = {}
69
+
70
+ # Initialize a simplified conversation history for file saving only
71
+ self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
72
+
73
+ # Store conversation data for different models
74
+ self._conversation_data = {}
75
+ self.preprompt = system_prompt
76
+
77
+ def update_available_models(self):
78
+ """Update the available models list from HuggingFace"""
79
+ try:
80
+ models = self.get_models()
81
+ if models and len(models) > 0:
82
+ self.AVAILABLE_MODELS = models
83
+ except Exception:
84
+ # Fallback to default models list if fetching fails
85
+ pass
86
+
87
+ @classmethod
88
+ def get_models(cls):
89
+ """Fetch available models from HuggingFace."""
90
+ try:
91
+ response = requests.get("https://cohereforai-c4ai-command.hf.space/")
92
+ text = response.text
93
+ models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
94
+
95
+ if not models_match:
96
+ return cls.AVAILABLE_MODELS
97
+
98
+ models_text = models_match.group(1)
99
+ models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
100
+ models_text = models_text.replace('void 0', 'null')
101
+
102
+ def add_quotation_mark(match):
103
+ return f'{match.group(1)}"{match.group(2)}":'
104
+
105
+ models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
106
+
107
+ models_data = json.loads(models_text)
108
+ # print([model["id"] for model in models_data])
109
+ return [model["id"] for model in models_data]
110
+ except Exception:
111
+ return cls.AVAILABLE_MODELS
112
+
113
+ def create_conversation(self, model: str):
114
+ """Create a new conversation with the specified model."""
115
+ url = "https://cohereforai-c4ai-command.hf.space/conversation"
116
+ payload = {"model": model, "preprompt": self.preprompt,}
117
+
118
+ # Update referer for this specific request
119
+ headers = self.headers.copy()
120
+ headers["Referer"] = f"https://cohereforai-c4ai-command.hf.space/"
121
+
122
+ try:
123
+ response = self.session.post(url, json=payload, headers=headers)
124
+
125
+ if response.status_code == 401:
126
+ raise exceptions.AuthenticationError("Authentication failed.")
127
+
128
+ # Handle other error codes
129
+ if response.status_code != 200:
130
+ return None
131
+
132
+ data = response.json()
133
+ conversation_id = data.get("conversationId")
134
+
135
+ # Store conversation data
136
+ if model not in self._conversation_data:
137
+ self._conversation_data[model] = {
138
+ "conversationId": conversation_id,
139
+ "messageId": str(uuid.uuid4()) # Initial message ID
140
+ }
141
+
142
+ return conversation_id
143
+ except requests.exceptions.RequestException:
144
+ return None
145
+
146
+ def fetch_message_id(self, conversation_id: str) -> str:
147
+ """Fetch the latest message ID for a conversation."""
148
+ try:
149
+ url = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
150
+ response = self.session.get(url, headers=self.headers)
151
+ response.raise_for_status()
152
+
153
+ # Parse the JSON data from the response
154
+ json_data = None
155
+ for line in response.text.split('\n'):
156
+ if line.strip():
157
+ try:
158
+ parsed = json.loads(line)
159
+ if isinstance(parsed, dict) and "nodes" in parsed:
160
+ json_data = parsed
161
+ break
162
+ except json.JSONDecodeError:
163
+ continue
164
+
165
+ if not json_data:
166
+ # Fall back to a UUID if we can't parse the response
167
+ return str(uuid.uuid4())
168
+
169
+ # Extract message ID using the same pattern as in the example
170
+ if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
171
+ return str(uuid.uuid4())
172
+
173
+ data = json_data["nodes"][1]["data"]
174
+ keys = data[data[0]["messages"]]
175
+ message_keys = data[keys[-1]]
176
+ message_id = data[message_keys["id"]]
177
+
178
+ return message_id
179
+
180
+ except Exception:
181
+ # Fall back to a UUID if there's an error
182
+ return str(uuid.uuid4())
183
+
184
+ def generate_boundary(self):
185
+ """Generate a random boundary for multipart/form-data requests"""
186
+ boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
187
+ boundary = "----WebKitFormBoundary"
188
+ boundary += "".join(random.choice(boundary_chars) for _ in range(16))
189
+ return boundary
190
+
191
+ def process_response(self, response, prompt: str):
192
+ """Process streaming response and extract content."""
193
+ full_text = ""
194
+ sources = None
195
+ reasoning_text = ""
196
+ has_reasoning = False
197
+
198
+ for line in response.iter_lines(decode_unicode=True):
199
+ if not line:
200
+ continue
201
+
202
+ try:
203
+ # Parse each line as JSON
204
+ data = json.loads(line)
205
+
206
+ # Handle different response types
207
+ if "type" not in data:
208
+ continue
209
+
210
+ if data["type"] == "stream" and "token" in data:
211
+ token = data["token"].replace("\u0000", "")
212
+ full_text += token
213
+ resp = {"text": token}
214
+ yield resp
215
+ elif data["type"] == "finalAnswer":
216
+ final_text = data.get("text", "")
217
+ if final_text and not full_text:
218
+ full_text = final_text
219
+ resp = {"text": final_text}
220
+ yield resp
221
+ elif data["type"] == "webSearch" and "sources" in data:
222
+ sources = data["sources"]
223
+ elif data["type"] == "reasoning":
224
+ has_reasoning = True
225
+ if data.get("subtype") == "stream" and "token" in data:
226
+ reasoning_text += data["token"]
227
+ # elif data.get("subtype") == "status":
228
+ # # For status updates in reasoning, we can just append them as a comment
229
+ # if data.get("status"):
230
+ # reasoning_text += f"\n# {data['status']}"
231
+
232
+ # If we have reasoning, prepend it to the next text output
233
+ if reasoning_text and not full_text:
234
+ resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
235
+ yield resp
236
+
237
+ except json.JSONDecodeError:
238
+ continue
239
+
240
+ # Update conversation history only for saving to file if needed
241
+ if full_text and self.conversation.file:
242
+ if has_reasoning:
243
+ full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
244
+ self.last_response = {"text": full_text_with_reasoning}
245
+ self.conversation.update_chat_history(prompt, full_text_with_reasoning)
246
+ else:
247
+ self.last_response = {"text": full_text}
248
+ self.conversation.update_chat_history(prompt, full_text)
249
+
250
+ return full_text
251
+
252
+ def ask(
253
+ self,
254
+ prompt: str,
255
+ stream: bool = False,
256
+ raw: bool = False,
257
+ optimizer: str = None,
258
+ conversationally: bool = False,
259
+ web_search: bool = False,
260
+ ) -> Union[Dict[str, Any], Generator]:
261
+ """Send a message to the HuggingFace Chat API"""
262
+ model = self.model
263
+
264
+ # Check if we have a conversation for this model
265
+ if model not in self._conversation_data:
266
+ conversation_id = self.create_conversation(model)
267
+ if not conversation_id:
268
+ raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
269
+ else:
270
+ conversation_id = self._conversation_data[model]["conversationId"]
271
+ # Refresh message ID
272
+ self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
273
+
274
+ url = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}"
275
+ message_id = self._conversation_data[model]["messageId"]
276
+
277
+ # Data to send - use the prompt directly without generating a complete prompt
278
+ # since HuggingFace maintains conversation state internally
279
+ request_data = {
280
+ "inputs": prompt,
281
+ "id": message_id,
282
+ "is_retry": False,
283
+ "is_continue": False,
284
+ "web_search": web_search,
285
+ "tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
286
+ }
287
+
288
+ # Update headers for this specific request
289
+ headers = self.headers.copy()
290
+ headers["Referer"] = f"https://cohereforai-c4ai-command.hf.space/conversation/{conversation_id}"
291
+
292
+ # Create multipart form data
293
+ boundary = self.generate_boundary()
294
+ multipart_headers = headers.copy()
295
+ multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
296
+
297
+ # Serialize the data to JSON
298
+ data_json = json.dumps(request_data, separators=(',', ':'))
299
+
300
+ # Create the multipart form data body
301
+ body = f"--{boundary}\r\n"
302
+ body += f'Content-Disposition: form-data; name="data"\r\n'
303
+ body += f"Content-Type: application/json\r\n\r\n"
304
+ body += f"{data_json}\r\n"
305
+ body += f"--{boundary}--\r\n"
306
+
307
+ multipart_headers["Content-Length"] = str(len(body))
308
+
309
+ def for_stream():
310
+ try:
311
+ # Try with multipart/form-data first
312
+ response = None
313
+ try:
314
+ response = self.session.post(
315
+ url,
316
+ data=body,
317
+ headers=multipart_headers,
318
+ stream=True,
319
+ timeout=self.timeout
320
+ )
321
+ except requests.exceptions.RequestException:
322
+ pass
323
+
324
+ # If multipart fails or returns error, try with regular JSON
325
+ if not response or response.status_code != 200:
326
+ response = self.session.post(
327
+ url,
328
+ json=request_data,
329
+ headers=headers,
330
+ stream=True,
331
+ timeout=self.timeout
332
+ )
333
+
334
+ # If both methods fail, raise exception
335
+ if response.status_code != 200:
336
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
337
+
338
+ # Process the streaming response
339
+ yield from self.process_response(response, prompt)
340
+
341
+ except Exception as e:
342
+ if isinstance(e, requests.exceptions.RequestException):
343
+ if hasattr(e, 'response') and e.response is not None:
344
+ status_code = e.response.status_code
345
+ if status_code == 401:
346
+ raise exceptions.AuthenticationError("Authentication failed.")
347
+
348
+ # Try another model if current one fails
349
+ if len(self.AVAILABLE_MODELS) > 1:
350
+ current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
351
+ next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
352
+ self.model = self.AVAILABLE_MODELS[next_model_index]
353
+
354
+ # Create new conversation with the alternate model
355
+ conversation_id = self.create_conversation(self.model)
356
+ if conversation_id:
357
+ # Try again with the new model
358
+ yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
359
+ conversationally=conversationally, web_search=web_search)
360
+ return
361
+
362
+ # If we get here, all models failed
363
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
364
+
365
+ def for_non_stream():
366
+ response_text = ""
367
+ for response in for_stream():
368
+ if "text" in response:
369
+ response_text += response["text"]
370
+ self.last_response = {"text": response_text}
371
+ return self.last_response
372
+
373
+ return for_stream() if stream else for_non_stream()
374
+
375
+ def chat(
376
+ self,
377
+ prompt: str,
378
+ stream: bool = False,
379
+ optimizer: str = None,
380
+ conversationally: bool = False,
381
+ web_search: bool = False
382
+ ) -> Union[str, Generator]:
383
+ """Generate a response to a prompt"""
384
+ def for_stream():
385
+ for response in self.ask(
386
+ prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
387
+ ):
388
+ yield self.get_message(response)
389
+
390
+ def for_non_stream():
391
+ return self.get_message(
392
+ self.ask(
393
+ prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
394
+ )
395
+ )
396
+
397
+ return for_stream() if stream else for_non_stream()
398
+
399
+ def get_message(self, response: dict) -> str:
400
+ """Extract message text from response"""
401
+ assert isinstance(response, dict), "Response should be of dict data-type only"
402
+ return response.get("text", "")
403
+
404
+ if __name__ == "__main__":
405
+ # Simple test code
406
+ from rich import print
407
+ try:
408
+ ai = C4ai()
409
+ response = ai.chat("How's it going?", stream=True, web_search=False)
410
+ for chunk in response:
411
+ print(chunk, end="", flush=True)
412
+ print()
413
+ except Exception as e:
414
+ print(f"An error occurred: {e}")
@@ -9,12 +9,10 @@ from webscout import exceptions
9
9
  from typing import Any, AsyncGenerator, Dict
10
10
  import cloudscraper
11
11
  from webscout import LitAgent
12
- from webscout.Litlogger import Logger, LogFormat
13
12
 
14
13
  class Cloudflare(Provider):
15
14
  """
16
15
  Cloudflare provider to interact with Cloudflare's text generation API.
17
- Includes logging capabilities using Logger and uses LitAgent for user-agent.
18
16
  """
19
17
 
20
18
  # Updated AVAILABLE_MODELS from given JSON data
@@ -72,7 +70,6 @@ class Cloudflare(Provider):
72
70
  act: str = None,
73
71
  model: str = "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
74
72
  system_prompt: str = "You are a helpful assistant.",
75
- logging: bool = False
76
73
  ):
77
74
  """Instantiates Cloudflare Provider
78
75
 
@@ -145,13 +142,13 @@ class Cloudflare(Provider):
145
142
  self.conversation.history_offset = history_offset
146
143
 
147
144
  # Initialize logger if logging is enabled
148
- self.logger = Logger(
149
- name="Cloudflare",
150
- format=LogFormat.MODERN_EMOJI,
151
- ) if logging else None
145
+ # self.logger = Logger(
146
+ # name="Cloudflare",
147
+ # format=LogFormat.MODERN_EMOJI,
148
+ # ) if logging else None
152
149
 
153
- if self.logger:
154
- self.logger.info("Cloudflare initialized successfully")
150
+ # if self.logger:
151
+ # self.logger.info("Cloudflare initialized successfully")
155
152
 
156
153
  def ask(
157
154
  self,
@@ -178,11 +175,11 @@ class Cloudflare(Provider):
178
175
  conversation_prompt = getattr(Optimizers, optimizer)(
179
176
  conversation_prompt if conversationally else prompt
180
177
  )
181
- if self.logger:
182
- self.logger.debug(f"Applied optimizer: {optimizer}")
178
+ # if self.logger:
179
+ # self.logger.debug(f"Applied optimizer: {optimizer}")
183
180
  else:
184
- if self.logger:
185
- self.logger.error(f"Invalid optimizer requested: {optimizer}")
181
+ # if self.logger:
182
+ # self.logger.error(f"Invalid optimizer requested: {optimizer}")
186
183
  raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
187
184
 
188
185
  payload = {
@@ -192,13 +189,13 @@ class Cloudflare(Provider):
192
189
  ],
193
190
  "lora": None,
194
191
  "model": self.model,
195
- "max_tokens": 512,
192
+ "max_tokens": self.max_tokens_to_sample,
196
193
  "stream": True
197
194
  }
198
195
 
199
196
  def for_stream():
200
- if self.logger:
201
- self.logger.debug("Sending streaming request to Cloudflare API...")
197
+ # if self.logger:
198
+ # self.logger.debug("Sending streaming request to Cloudflare API...")
202
199
  response = self.scraper.post(
203
200
  self.chat_endpoint,
204
201
  headers=self.headers,
@@ -208,8 +205,8 @@ class Cloudflare(Provider):
208
205
  timeout=self.timeout
209
206
  )
210
207
  if not response.ok:
211
- if self.logger:
212
- self.logger.error(f"Request failed: ({response.status_code}, {response.reason})")
208
+ # if self.logger:
209
+ # self.logger.error(f"Request failed: ({response.status_code}, {response.reason})")
213
210
  raise exceptions.FailedToGenerateResponseError(
214
211
  f"Failed to generate response - ({response.status_code}, {response.reason})"
215
212
  )
@@ -222,8 +219,8 @@ class Cloudflare(Provider):
222
219
  yield content if raw else dict(text=content)
223
220
  self.last_response.update(dict(text=streaming_response))
224
221
  self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
225
- if self.logger:
226
- self.logger.info("Streaming response completed successfully")
222
+ # if self.logger:
223
+ # self.logger.info("Streaming response completed successfully")
227
224
 
228
225
  def for_non_stream():
229
226
  for _ in for_stream():
@@ -269,7 +266,7 @@ class Cloudflare(Provider):
269
266
 
270
267
  if __name__ == '__main__':
271
268
  from rich import print
272
- ai = Cloudflare(timeout=5000, logging=True)
269
+ ai = Cloudflare(timeout=5000)
273
270
  response = ai.chat("write a poem about AI", stream=True)
274
271
  for chunk in response:
275
272
  print(chunk, end="", flush=True)
@@ -1,13 +1,11 @@
1
-
2
1
  import requests
3
2
  import json
4
- from typing import Any, Dict, Optional, Generator
3
+ from typing import Any, Dict, Generator
5
4
  from webscout.AIutel import Optimizers
6
5
  from webscout.AIutel import Conversation
7
6
  from webscout.AIutel import AwesomePrompts
8
7
  from webscout.AIbase import Provider
9
8
  from webscout import exceptions
10
- from webscout.Litlogger import Logger, LogFormat
11
9
  from webscout import LitAgent as Lit
12
10
 
13
11
  class DeepSeek(Provider):
@@ -33,8 +31,7 @@ class DeepSeek(Provider):
33
31
  history_offset: int = 10250,
34
32
  act: str = None,
35
33
  model: str = "deepseek-r1", # Default model
36
- system_prompt: str = "You are a helpful AI assistant.",
37
- logging: bool = False
34
+ system_prompt: str = "You are a helpful AI assistant."
38
35
  ):
39
36
  """
40
37
  Initializes the DeepSeek AI API with given parameters.
@@ -42,15 +39,6 @@ class DeepSeek(Provider):
42
39
  if model not in self.AVAILABLE_MODELS:
43
40
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS.keys()}")
44
41
 
45
- # Initialize logging
46
- self.logger = Logger(
47
- name="DeepSeek",
48
- format=LogFormat.MODERN_EMOJI,
49
- ) if logging else None
50
-
51
- if self.logger:
52
- self.logger.info(f"Initializing DeepSeek with model: {model}")
53
-
54
42
  self.session = requests.Session()
55
43
  self.is_conversation = is_conversation
56
44
  self.max_tokens_to_sample = max_tokens
@@ -93,20 +81,13 @@ class DeepSeek(Provider):
93
81
  conversationally: bool = False,
94
82
  ) -> Dict[str, Any]:
95
83
  """Chat with AI"""
96
- if self.logger:
97
- self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
98
-
99
84
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
100
85
  if optimizer:
101
86
  if optimizer in self.__available_optimizers:
102
87
  conversation_prompt = getattr(Optimizers, optimizer)(
103
88
  conversation_prompt if conversationally else prompt
104
89
  )
105
- if self.logger:
106
- self.logger.debug(f"Applied optimizer: {optimizer}")
107
90
  else:
108
- if self.logger:
109
- self.logger.error(f"Invalid optimizer: {optimizer}")
110
91
  raise Exception(
111
92
  f"Optimizer is not one of {self.__available_optimizers}"
112
93
  )
@@ -122,13 +103,9 @@ class DeepSeek(Provider):
122
103
  }
123
104
 
124
105
  def for_stream():
125
- if self.logger:
126
- self.logger.debug("Sending streaming request to DeepInfra API...")
127
106
  try:
128
107
  with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
129
108
  if response.status_code != 200:
130
- if self.logger:
131
- self.logger.error(f"Request failed with status code {response.status_code}")
132
109
  raise exceptions.FailedToGenerateResponseError(
133
110
  f"Request failed with status code {response.status_code}"
134
111
  )
@@ -151,18 +128,12 @@ class DeepSeek(Provider):
151
128
  resp = {"text": content}
152
129
  yield resp if raw else resp
153
130
  except json.JSONDecodeError:
154
- if self.logger:
155
- self.logger.error("JSON decode error in streaming data")
156
131
  continue
157
132
 
158
133
  self.last_response = {"text": streaming_text}
159
134
  self.conversation.update_chat_history(prompt, streaming_text)
160
- if self.logger:
161
- self.logger.info("Streaming response completed successfully")
162
135
 
163
136
  except requests.RequestException as e:
164
- if self.logger:
165
- self.logger.error(f"Request failed: {e}")
166
137
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
167
138
 
168
139
  def for_non_stream():
@@ -207,7 +178,7 @@ if __name__ == "__main__":
207
178
  from rich import print
208
179
 
209
180
  # Example usage
210
- ai = DeepSeek(system_prompt="You are an expert AI assistant.", logging=True)
181
+ ai = DeepSeek(system_prompt="You are an expert AI assistant.")
211
182
 
212
183
  try:
213
184
  # Send a prompt and stream the response